bitkeeper revision 1.116 (3e6bd5fanPF3nYb2uWLHO7p0PnxDhg)
authorkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Mon, 10 Mar 2003 00:02:02 +0000 (00:02 +0000)
committerkaf24@labyrinth.cl.cam.ac.uk <kaf24@labyrinth.cl.cam.ac.uk>
Mon, 10 Mar 2003 00:02:02 +0000 (00:02 +0000)
xl_block.c, blkdev.h, xen_block.c:
  Improved error handling for blkdev data accesses. Added sanity checking for read requests -- test if buffer modified.

xen/drivers/block/xen_block.c
xen/include/xeno/blkdev.h
xenolinux-2.4.21-pre4-sparse/arch/xeno/drivers/block/xl_block.c

index b72bb0640c63b04f58aac1bc50956403d633decd..4f30cdf801c9a17f93c1a714450703828c00d1a8 100644 (file)
 #include <xeno/keyhandler.h>
 #include <xeno/interrupt.h>
 #include <xeno/segment.h>
+#include <asm/domain_page.h> /* TEST_READ_VALIDITY */
 
 #if 1
+#define TEST_READ_VALIDITY
 #define DPRINTK(_f, _a...) printk( _f , ## _a )
 #else
 #define DPRINTK(_f, _a...) ((void)0)
@@ -198,6 +200,23 @@ static void end_block_io_op(struct buffer_head *bh, int uptodate)
     unsigned long flags;
     pending_req_t *pending_req = bh->pending_req;
 
+    /* An error fails the entire request. */
+    if ( !uptodate )
+    {
+        DPRINTK("Buffer not up-to-date at end of operation\n");
+        pending_req->status = 1;
+    }
+#ifdef TEST_READ_VALIDITY
+    else
+    {
+        unsigned long *buff = map_domain_mem(virt_to_phys(bh->b_data));
+        if ( (buff[  0] == 0xdeadbeef) &&
+             (buff[127] == 0xdeadbeef) )
+            printk("A really fucked buffer at %ld\n", bh->b_rsector);
+        unmap_domain_mem(buff);
+    }
+#endif
+
     unlock_buffer(pending_req->domain, 
                   virt_to_phys(bh->b_data), 
                   bh->b_size, 
@@ -206,7 +225,7 @@ static void end_block_io_op(struct buffer_head *bh, int uptodate)
     if ( atomic_dec_and_test(&pending_req->pendcnt) )
     {
         make_response(pending_req->domain, pending_req->id,
-                      pending_req->operation, uptodate ? 0 : 1);
+                      pending_req->operation, pending_req->status);
         spin_lock_irqsave(&pend_prod_lock, flags);
         pending_ring[pending_prod] = pending_req - pending_reqs;
         PENDREQ_IDX_INC(pending_prod);
@@ -572,6 +591,7 @@ static void dispatch_rw_block_io(struct task_struct *p, int index)
     pending_req->domain    = p;
     pending_req->id        = req->id;
     pending_req->operation = operation;
+    pending_req->status    = 0;
     atomic_set(&pending_req->pendcnt, nr_psegs);
 
     /* Now we pass each segment down to the real blkdev layer. */
@@ -595,6 +615,12 @@ static void dispatch_rw_block_io(struct task_struct *p, int index)
         } 
         else
         {
+#ifdef TEST_READ_VALIDITY
+            unsigned long *buff = map_domain_mem(phys_seg[i].buffer);
+            buff[  0] = 0xdeadbeef;
+            buff[127] = 0xdeadbeef;
+            unmap_domain_mem(buff);
+#endif
             bh->b_state = (1 << BH_Mapped) | (1 << BH_Read);
         }
 
index 7a6a6844dd1e1853f20d7cc9b90d58ff714ad456..5ab2ba300478af107cb06b065d424b30a7bc41c2 100644 (file)
@@ -20,6 +20,7 @@ typedef struct {
     unsigned long       id;
     atomic_t            pendcnt;
     unsigned short      operation;
+    unsigned short      status;
 } pending_req_t;
 
 extern kdev_t xendev_to_physdev(unsigned short xendev);
index 40f93cc251d7e7c87b0395df807d56e00bced283..bfdfa50c3ead4843b793d6386e478936f31c8cd8 100644 (file)
@@ -440,13 +440,15 @@ static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
        {
         case XEN_BLOCK_READ:
         case XEN_BLOCK_WRITE:
+            if ( bret->status )
+                printk(KERN_ALERT "Bad return from blkdev data request\n");
             for ( bh = (struct buffer_head *)bret->id; 
                   bh != NULL; 
                   bh = next_bh )
             {
                 next_bh = bh->b_reqnext;
                 bh->b_reqnext = NULL;
-                bh->b_end_io(bh, 1);
+                bh->b_end_io(bh, !bret->status);
             }
            break;
            
@@ -454,6 +456,8 @@ static void xlblk_response_int(int irq, void *dev_id, struct pt_regs *ptregs)
         case XEN_BLOCK_SEG_DELETE:
         case XEN_BLOCK_PROBE_SEG:
         case XEN_BLOCK_PROBE_BLK:
+            if ( bret->status )
+                printk(KERN_ALERT "Bad return from blkdev control request\n");
             xlblk_control_msg_pending = 0;
             break;
          
@@ -488,7 +492,7 @@ int xenolinux_control_msg(int operation, char *buffer, int size)
     char *aligned_buf;
 
     /* We copy from an aligned buffer, as interface needs sector alignment. */
-    aligned_buf = get_free_page(GFP_KERNEL);
+    aligned_buf = (char *)get_free_page(GFP_KERNEL);
     if ( aligned_buf == NULL ) BUG();
 
     xlblk_control_msg_pending = 1;
@@ -501,7 +505,7 @@ int xenolinux_control_msg(int operation, char *buffer, int size)
     while ( xlblk_control_msg_pending ) barrier();
 
     memcpy(buffer, aligned_buf, size);
-    free_page(aligned_buf);
+    free_page((unsigned long)aligned_buf);
     
     return 0;
 }